In [1]:
cd ../..
/Users/shanekercheval/repos/data-science-template
In [2]:
%run "source/config/notebook_settings.py"
import os
import mlflow
from mlflow.tracking import MlflowClient
from helpsk.utility import read_pickle
import helpsk as hlp

from source.library.utilities import Timer, log_info, get_config

config = get_config()
mlflow_uri = config['MLFLOW']['URI']
log_info(f"MLFlow URI: {mlflow_uri}")

client = MlflowClient(tracking_uri='http://127.0.0.1:1234')
2022-06-10 16:23:57 - INFO     | MLFlow URI: http://127.0.0.1:1234

Get Latest Experiment Run from MLFlow¶

In [3]:
# Get the production model version and actual model
production_model_info = client.get_latest_versions(name=config['MLFLOW']['MODEL_NAME'], stages=['Production'])
assert len(production_model_info) == 1
production_model_info = production_model_info[0]
production_model = read_pickle(client.download_artifacts(
    run_id=production_model_info.run_id,
    path='model/model.pkl'
))
log_info(f"Production Model Version: {production_model_info.version}")
2022-06-10 16:23:57 - INFO     | Production Model Version: 2
In [4]:
# get experiment and latest run info
credit_experiment = client.get_experiment_by_name(name=config['MLFLOW']['EXPERIMENT_NAME'])
runs = client.list_run_infos(experiment_id=credit_experiment.experiment_id)
latest_run = runs[np.argmax([x.start_time for x in runs])]
In [5]:
yaml_path = client.download_artifacts(run_id=latest_run.run_id, path='experiment.yaml')
results = hlp.sklearn_eval.MLExperimentResults.from_yaml_file(yaml_file_name = yaml_path)
In [6]:
# get the best estimator from the BayesSearchCV
best_estimator = read_pickle(client.download_artifacts(
    run_id=latest_run.run_id,
    path='model/model.pkl'
))
In [7]:
best_estimator.model
Out[7]:
Pipeline(steps=[('prep',
                 ColumnTransformer(transformers=[('numeric',
                                                  Pipeline(steps=[('imputer',
                                                                   TransformerChooser(transformer=SimpleImputer())),
                                                                  ('scaler',
                                                                   TransformerChooser()),
                                                                  ('pca',
                                                                   TransformerChooser())]),
                                                  ['duration', 'credit_amount',
                                                   'installment_commitment',
                                                   'residence_since', 'age',
                                                   'existing_credits',
                                                   'num_dependents']),
                                                 ('non_numeric',
                                                  Pipeline(steps...,
                                                                   TransformerChooser(transformer=OneHotEncoder(handle_unknown='ignore')))]),
                                                  ['checking_status',
                                                   'credit_history', 'purpose',
                                                   'savings_status',
                                                   'employment',
                                                   'personal_status',
                                                   'other_parties',
                                                   'property_magnitude',
                                                   'other_payment_plans',
                                                   'housing', 'job',
                                                   'own_telephone',
                                                   'foreign_worker'])])),
                ('model',
                 RandomForestClassifier(n_estimators=500, random_state=42))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
Pipeline(steps=[('prep',
                 ColumnTransformer(transformers=[('numeric',
                                                  Pipeline(steps=[('imputer',
                                                                   TransformerChooser(transformer=SimpleImputer())),
                                                                  ('scaler',
                                                                   TransformerChooser()),
                                                                  ('pca',
                                                                   TransformerChooser())]),
                                                  ['duration', 'credit_amount',
                                                   'installment_commitment',
                                                   'residence_since', 'age',
                                                   'existing_credits',
                                                   'num_dependents']),
                                                 ('non_numeric',
                                                  Pipeline(steps...,
                                                                   TransformerChooser(transformer=OneHotEncoder(handle_unknown='ignore')))]),
                                                  ['checking_status',
                                                   'credit_history', 'purpose',
                                                   'savings_status',
                                                   'employment',
                                                   'personal_status',
                                                   'other_parties',
                                                   'property_magnitude',
                                                   'other_payment_plans',
                                                   'housing', 'job',
                                                   'own_telephone',
                                                   'foreign_worker'])])),
                ('model',
                 RandomForestClassifier(n_estimators=500, random_state=42))])
ColumnTransformer(transformers=[('numeric',
                                 Pipeline(steps=[('imputer',
                                                  TransformerChooser(transformer=SimpleImputer())),
                                                 ('scaler',
                                                  TransformerChooser()),
                                                 ('pca',
                                                  TransformerChooser())]),
                                 ['duration', 'credit_amount',
                                  'installment_commitment', 'residence_since',
                                  'age', 'existing_credits',
                                  'num_dependents']),
                                ('non_numeric',
                                 Pipeline(steps=[('encoder',
                                                  TransformerChooser(transformer=OneHotEncoder(handle_unknown='ignore')))]),
                                 ['checking_status', 'credit_history',
                                  'purpose', 'savings_status', 'employment',
                                  'personal_status', 'other_parties',
                                  'property_magnitude', 'other_payment_plans',
                                  'housing', 'job', 'own_telephone',
                                  'foreign_worker'])])
['duration', 'credit_amount', 'installment_commitment', 'residence_since', 'age', 'existing_credits', 'num_dependents']
TransformerChooser(transformer=SimpleImputer())
SimpleImputer()
SimpleImputer()
TransformerChooser()
TransformerChooser()
['checking_status', 'credit_history', 'purpose', 'savings_status', 'employment', 'personal_status', 'other_parties', 'property_magnitude', 'other_payment_plans', 'housing', 'job', 'own_telephone', 'foreign_worker']
TransformerChooser(transformer=OneHotEncoder(handle_unknown='ignore'))
OneHotEncoder(handle_unknown='ignore')
OneHotEncoder(handle_unknown='ignore')
RandomForestClassifier(n_estimators=500, random_state=42)

Training & Test Data Info¶

In [8]:
client.download_artifacts(run_id=latest_run.run_id, path='x_train.pkl')
Out[8]:
'/Users/shanekercheval/repos/data-science-template/mlflow-artifact-root/1/afe5870fdbde43d491412c67cc55fe0f/artifacts/x_train.pkl'
In [9]:
with Timer("Loading training/test datasets"):
    X_train = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='x_train.pkl'))
    X_test = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='x_test.pkl'))
    y_train = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='y_train.pkl'))
    y_test = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='y_test.pkl'))
2022-06-10 16:23:57 - INFO     | *****Timer Started: Loading training/test datasets
2022-06-10 16:23:57 - INFO     | *****Timer Finished (0.00 seconds)
In [10]:
log_info(X_train.shape)
log_info(len(y_train))

log_info(X_test.shape)
log_info(len(y_test))
2022-06-10 16:23:57 - INFO     | (800, 20)
2022-06-10 16:23:57 - INFO     | 800
2022-06-10 16:23:57 - INFO     | (200, 20)
2022-06-10 16:23:57 - INFO     | 200
In [11]:
np.unique(y_train, return_counts=True)
Out[11]:
(array([0, 1]), array([559, 241]))
In [12]:
np.unique(y_train, return_counts=True)[1] / np.sum(np.unique(y_train, return_counts=True)[1])
Out[12]:
array([0.69875, 0.30125])
In [13]:
np.unique(y_test, return_counts=True)[1] / np.sum(np.unique(y_test, return_counts=True)[1])
Out[13]:
array([0.705, 0.295])

Cross Validation Results¶

Best Scores/Params¶

In [14]:
log_info(f"Best Score: {results.best_score}")
2022-06-10 16:23:57 - INFO     | Best Score: 0.7832478083744058
In [15]:
log_info(f"Best Params: {results.best_params}")
2022-06-10 16:23:57 - INFO     | Best Params: {'model': 'RandomForestClassifier()', 'imputer': 'SimpleImputer()', 'scaler': 'None', 'pca': 'None', 'encoder': 'OneHotEncoder()'}
In [16]:
# Best model from each model-type.
df = results.to_formatted_dataframe(return_style=False, include_rank=True)
df["model_rank"] = df.groupby("model")["roc_auc Mean"].rank(method="first", ascending=False)
df.query('model_rank == 1')
Out[16]:
rank roc_auc Mean roc_auc 95CI.LO roc_auc 95CI.HI model C max_features max_depth n_estimators min_samples_split min_samples_leaf max_samples criterion learning_rate min_child_weight subsample colsample_bytree colsample_bylevel reg_alpha reg_lambda imputer scaler pca encoder model_rank
10 1 0.78 0.73 0.83 RandomForestClassifier() NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN SimpleImputer() None None OneHotEncoder() 1.00
0 3 0.77 0.70 0.85 LogisticRegression() NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN SimpleImputer() StandardScaler() None OneHotEncoder() 1.00
17 4 0.77 0.73 0.82 XGBClassifier() NaN NaN 5.00 1440.00 NaN NaN NaN NaN 0.01 3.00 0.67 0.91 0.53 0.04 2.33 SimpleImputer() None PCA('mle') CustomOrdinalEncoder() 1.00
5 6 0.77 0.72 0.81 ExtraTreesClassifier() NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN SimpleImputer() None None OneHotEncoder() 1.00
In [17]:
results.to_formatted_dataframe(return_style=True,
                               include_rank=True,
                               num_rows=500)
Out[17]:
rank roc_auc Mean roc_auc 95CI.LO roc_auc 95CI.HI model C max_features max_depth n_estimators min_samples_split min_samples_leaf max_samples criterion learning_rate min_child_weight subsample colsample_bytree colsample_bylevel reg_alpha reg_lambda imputer scaler pca encoder
1 0.783 0.734 0.833 RandomForestClassifier() <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() None None OneHotEncoder()
2 0.774 0.757 0.792 RandomForestClassifier() <NA> 0.566 47.000 812.000 45.000 13.000 0.830 entropy <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') None None OneHotEncoder()
3 0.774 0.701 0.847 LogisticRegression() <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() StandardScaler() None OneHotEncoder()
4 0.773 0.727 0.819 XGBClassifier() <NA> <NA> 5.000 1,440.000 <NA> <NA> <NA> <NA> 0.012 3.000 0.668 0.909 0.531 0.044 2.330 SimpleImputer() None PCA('mle') CustomOrdinalEncoder()
5 0.771 0.740 0.802 XGBClassifier() <NA> <NA> 10.000 1,566.000 <NA> <NA> <NA> <NA> 0.025 3.000 0.765 0.769 0.994 0.747 2.708 SimpleImputer(strategy='most_frequent') None None CustomOrdinalEncoder()
6 0.770 0.725 0.815 ExtraTreesClassifier() <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() None None OneHotEncoder()
7 0.767 0.737 0.797 RandomForestClassifier() <NA> 0.128 87.000 839.000 39.000 40.000 0.736 gini <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='most_frequent') None PCA('mle') OneHotEncoder()
8 0.765 0.756 0.774 XGBClassifier() <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() None None OneHotEncoder()
9 0.765 0.678 0.852 LogisticRegression() 8.754 <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() StandardScaler() None OneHotEncoder()
10 0.764 0.744 0.785 RandomForestClassifier() <NA> 0.347 66.000 756.000 10.000 21.000 0.652 entropy <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') None PCA('mle') OneHotEncoder()
11 0.763 0.744 0.782 ExtraTreesClassifier() <NA> 0.372 91.000 1,353.000 14.000 9.000 0.612 entropy <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='most_frequent') None None OneHotEncoder()
12 0.758 0.753 0.763 ExtraTreesClassifier() <NA> 0.515 95.000 1,565.000 32.000 13.000 0.773 gini <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='most_frequent') None PCA('mle') CustomOrdinalEncoder()
13 0.758 0.729 0.786 RandomForestClassifier() <NA> 0.077 53.000 1,543.000 7.000 42.000 0.959 gini <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() None None CustomOrdinalEncoder()
14 0.757 0.745 0.768 ExtraTreesClassifier() <NA> 0.093 7.000 1,921.000 27.000 29.000 0.771 entropy <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='most_frequent') None PCA('mle') OneHotEncoder()
15 0.752 0.739 0.765 ExtraTreesClassifier() <NA> 0.085 20.000 519.000 48.000 32.000 0.761 entropy <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') None PCA('mle') OneHotEncoder()
16 0.750 0.728 0.772 XGBClassifier() <NA> <NA> 2.000 1,840.000 <NA> <NA> <NA> <NA> 0.294 8.000 0.991 0.816 0.580 0.014 1.085 SimpleImputer(strategy='most_frequent') None PCA('mle') CustomOrdinalEncoder()
17 0.749 0.705 0.792 XGBClassifier() <NA> <NA> 4.000 1,818.000 <NA> <NA> <NA> <NA> 0.090 26.000 0.998 0.648 0.907 0.003 1.350 SimpleImputer(strategy='median') None PCA('mle') CustomOrdinalEncoder()
18 0.742 0.690 0.794 LogisticRegression() 0.014 <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') StandardScaler() None CustomOrdinalEncoder()
19 0.739 0.694 0.784 LogisticRegression() 0.073 <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') StandardScaler() PCA('mle') CustomOrdinalEncoder()
20 0.737 0.691 0.783 LogisticRegression() 0.244 <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') StandardScaler() PCA('mle') CustomOrdinalEncoder()
In [18]:
results.to_formatted_dataframe(query='model == "RandomForestClassifier()"', include_rank=True)
Out[18]:
rank roc_auc Mean roc_auc 95CI.LO roc_auc 95CI.HI max_features max_depth n_estimators min_samples_split min_samples_leaf max_samples criterion imputer pca encoder
1 0.783 0.734 0.833 <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() None OneHotEncoder()
2 0.774 0.757 0.792 0.566 47.000 812.000 45.000 13.000 0.830 entropy SimpleImputer(strategy='median') None OneHotEncoder()
3 0.767 0.737 0.797 0.128 87.000 839.000 39.000 40.000 0.736 gini SimpleImputer(strategy='most_frequent') PCA('mle') OneHotEncoder()
4 0.764 0.744 0.785 0.347 66.000 756.000 10.000 21.000 0.652 entropy SimpleImputer(strategy='median') PCA('mle') OneHotEncoder()
5 0.758 0.729 0.786 0.077 53.000 1,543.000 7.000 42.000 0.959 gini SimpleImputer() None CustomOrdinalEncoder()
In [19]:
results.to_formatted_dataframe(query='model == "LogisticRegression()"', include_rank=True)
Out[19]:
rank roc_auc Mean roc_auc 95CI.LO roc_auc 95CI.HI C imputer pca encoder
1 0.774 0.701 0.847 <NA> SimpleImputer() None OneHotEncoder()
2 0.765 0.678 0.852 8.754 SimpleImputer() None OneHotEncoder()
3 0.742 0.690 0.794 0.014 SimpleImputer(strategy='median') None CustomOrdinalEncoder()
4 0.739 0.694 0.784 0.073 SimpleImputer(strategy='median') PCA('mle') CustomOrdinalEncoder()
5 0.737 0.691 0.783 0.244 SimpleImputer(strategy='median') PCA('mle') CustomOrdinalEncoder()

BayesSearchCV Performance Over Time¶

In [20]:
results.plot_performance_across_trials(facet_by='model').show()
In [21]:
results.plot_performance_across_trials(query='model == "RandomForestClassifier()"').show()

Variable Performance Over Time¶

In [22]:
results.plot_parameter_values_across_trials(query='model == "RandomForestClassifier()"').show()

Scatter Matrix¶

In [23]:
# results.plot_scatter_matrix(query='model == "RandomForestClassifier()"',
#                             height=1000, width=1000).show()

Variable Performance - Numeric¶

In [24]:
results.plot_performance_numeric_params(query='model == "RandomForestClassifier()"',
                                        height=800)
In [25]:
results.plot_parallel_coordinates(query='model == "RandomForestClassifier()"').show()

Variable Performance - Non-Numeric¶

In [26]:
results.plot_performance_non_numeric_params(query='model == "RandomForestClassifier()"').show()

In [27]:
results.plot_score_vs_parameter(
    query='model == "RandomForestClassifier()"',
    parameter='max_features',
    size='max_depth',
    color='encoder',
)

In [28]:
# results.plot_parameter_vs_parameter(
#     query='model == "XGBClassifier()"',
#     parameter_x='colsample_bytree',
#     parameter_y='learning_rate',
#     size='max_depth'
# )
In [29]:
# results.plot_parameter_vs_parameter(
#     query='model == "XGBClassifier()"',
#     parameter_x='colsample_bytree',
#     parameter_y='learning_rate',
#     size='imputer'
# )

Best Model - Test Set Performance¶

In [30]:
test_predictions = best_estimator.predict(X_test)
test_predictions[0:10]
Out[30]:
array([0.392, 0.7  , 0.322, 0.602, 0.642, 0.63 , 0.31 , 0.36 , 0.076,
       0.388])
In [31]:
evaluator = hlp.sklearn_eval.TwoClassEvaluator(
    actual_values=y_test,
    predicted_scores=test_predictions,
    score_threshold=0.37
)
In [32]:
evaluator.plot_actual_vs_predict_histogram()
In [33]:
evaluator.plot_confusion_matrix()
In [34]:
evaluator.all_metrics_df(return_style=True,
                         dummy_classifier_strategy=['prior', 'constant'],
                         round_by=3)
Out[34]:
  Score Dummy (prior) Dummy (constant) Explanation
AUC 0.773 0.500 0.500 Area under the ROC curve (true pos. rate vs false pos. rate); ranges from 0.5 (purely random classifier) to 1.0 (perfect classifier)
True Positive Rate 0.627 0.000 1.000 62.7% of positive instances were correctly identified.; i.e. 37 "Positive Class" labels were correctly identified out of 59 instances; a.k.a Sensitivity/Recall
True Negative Rate 0.823 1.000 0.000 82.3% of negative instances were correctly identified.; i.e. 116 "Negative Class" labels were correctly identified out of 141 instances
False Positive Rate 0.177 0.000 1.000 17.7% of negative instances were incorrectly identified as positive; i.e. 25 "Negative Class" labels were incorrectly identified as "Positive Class", out of 141 instances
False Negative Rate 0.373 1.000 0.000 37.3% of positive instances were incorrectly identified as negative; i.e. 22 "Positive Class" labels were incorrectly identified as "Negative Class", out of 59 instances
Positive Predictive Value 0.597 0.000 0.295 When the model claims an instance is positive, it is correct 59.7% of the time; i.e. out of the 62 times the model predicted "Positive Class", it was correct 37 times; a.k.a precision
Negative Predictive Value 0.841 0.705 0.000 When the model claims an instance is negative, it is correct 84.1% of the time; i.e. out of the 138 times the model predicted "Negative Class", it was correct 116 times
F1 Score 0.612 0.000 0.456 The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0.
Precision/Recall AUC 0.613 0.295 0.295 Precision/Recall AUC is calculated with `average_precision` which summarizes a precision-recall curve as the weighted mean of precisions achieved at each threshold. See sci-kit learn documentation for caveats.
Accuracy 0.765 0.705 0.295 76.5% of instances were correctly identified
Error Rate 0.235 0.295 0.705 23.5% of instances were incorrectly identified
% Positive 0.295 0.295 0.295 29.5% of the data are positive; i.e. out of 200 total observations; 59 are labeled as "Positive Class"
Total Observations 200 200 200 There are 200 total observations; i.e. sample size
In [35]:
evaluator.plot_roc_auc_curve().show()
<Figure size 720x444.984 with 0 Axes>
In [36]:
evaluator.plot_precision_recall_auc_curve().show()
In [37]:
evaluator.plot_threshold_curves(score_threshold_range=(0.1, 0.7)).show()
In [38]:
evaluator.plot_precision_recall_tradeoff(score_threshold_range=(0.1, 0.6)).show()
In [39]:
evaluator.calculate_lift_gain(return_style=True)
Out[39]:
  Gain Lift
Percentile    
5 0.12 2.37
10 0.24 2.37
15 0.36 2.37
20 0.44 2.20
25 0.53 2.10
30 0.59 1.98
35 0.66 1.89
40 0.71 1.78
45 0.71 1.58
50 0.76 1.53
55 0.78 1.42
60 0.83 1.38
65 0.86 1.33
70 0.90 1.28
75 0.93 1.24
80 0.93 1.17
85 0.97 1.14
90 0.98 1.09
95 1.00 1.05
100 1.00 1.00

Production Model - Test Set Performance¶

In [40]:
test_predictions = production_model.predict(X_test)
test_predictions[0:10]
Out[40]:
array([0.392, 0.7  , 0.322, 0.602, 0.642, 0.63 , 0.31 , 0.36 , 0.076,
       0.388])
In [41]:
evaluator = hlp.sklearn_eval.TwoClassEvaluator(
    actual_values=y_test,
    predicted_scores=test_predictions,
    score_threshold=0.37
)
In [42]:
evaluator.plot_actual_vs_predict_histogram()
In [43]:
evaluator.plot_confusion_matrix()
In [44]:
evaluator.all_metrics_df(return_style=True,
                         dummy_classifier_strategy=['prior', 'constant'],
                         round_by=3)
Out[44]:
  Score Dummy (prior) Dummy (constant) Explanation
AUC 0.773 0.500 0.500 Area under the ROC curve (true pos. rate vs false pos. rate); ranges from 0.5 (purely random classifier) to 1.0 (perfect classifier)
True Positive Rate 0.627 0.000 1.000 62.7% of positive instances were correctly identified.; i.e. 37 "Positive Class" labels were correctly identified out of 59 instances; a.k.a Sensitivity/Recall
True Negative Rate 0.823 1.000 0.000 82.3% of negative instances were correctly identified.; i.e. 116 "Negative Class" labels were correctly identified out of 141 instances
False Positive Rate 0.177 0.000 1.000 17.7% of negative instances were incorrectly identified as positive; i.e. 25 "Negative Class" labels were incorrectly identified as "Positive Class", out of 141 instances
False Negative Rate 0.373 1.000 0.000 37.3% of positive instances were incorrectly identified as negative; i.e. 22 "Positive Class" labels were incorrectly identified as "Negative Class", out of 59 instances
Positive Predictive Value 0.597 0.000 0.295 When the model claims an instance is positive, it is correct 59.7% of the time; i.e. out of the 62 times the model predicted "Positive Class", it was correct 37 times; a.k.a precision
Negative Predictive Value 0.841 0.705 0.000 When the model claims an instance is negative, it is correct 84.1% of the time; i.e. out of the 138 times the model predicted "Negative Class", it was correct 116 times
F1 Score 0.612 0.000 0.456 The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0.
Precision/Recall AUC 0.613 0.295 0.295 Precision/Recall AUC is calculated with `average_precision` which summarizes a precision-recall curve as the weighted mean of precisions achieved at each threshold. See sci-kit learn documentation for caveats.
Accuracy 0.765 0.705 0.295 76.5% of instances were correctly identified
Error Rate 0.235 0.295 0.705 23.5% of instances were incorrectly identified
% Positive 0.295 0.295 0.295 29.5% of the data are positive; i.e. out of 200 total observations; 59 are labeled as "Positive Class"
Total Observations 200 200 200 There are 200 total observations; i.e. sample size
In [45]:
evaluator.plot_roc_auc_curve().show()
<Figure size 720x444.984 with 0 Axes>
In [46]:
evaluator.plot_precision_recall_auc_curve().show()
In [47]:
evaluator.plot_threshold_curves(score_threshold_range=(0.1, 0.7)).show()
In [48]:
evaluator.plot_precision_recall_tradeoff(score_threshold_range=(0.1, 0.6)).show()
In [49]:
evaluator.calculate_lift_gain(return_style=True)
Out[49]:
  Gain Lift
Percentile    
5 0.12 2.37
10 0.24 2.37
15 0.36 2.37
20 0.44 2.20
25 0.53 2.10
30 0.59 1.98
35 0.66 1.89
40 0.71 1.78
45 0.71 1.58
50 0.76 1.53
55 0.78 1.42
60 0.83 1.38
65 0.86 1.33
70 0.90 1.28
75 0.93 1.24
80 0.93 1.17
85 0.97 1.14
90 0.98 1.09
95 1.00 1.05
100 1.00 1.00